From aedd1ba753f07d1cecd57d7a517eab921a85acb1 Mon Sep 17 00:00:00 2001 From: "djm@kirby.fc.hp.com" Date: Fri, 6 May 2005 21:20:51 +0000 Subject: [PATCH] bitkeeper revision 1.1389.9.2 (427bdfb3_b3KQR-vZHyKEmWLaNwEJQ) First implementation of hyperprivops (no fast assembly yet) Signed-off by: Dan Magenheimer --- xen/arch/ia64/asm-offsets.c | 4 ++++ xen/arch/ia64/ivt.S | 14 +++++++++++++- xen/arch/ia64/privop.c | 32 ++++++++++++++++++++++++++++++++ xen/arch/ia64/process.c | 4 ++++ xen/arch/ia64/vcpu.c | 12 ++++++++++++ 5 files changed, 65 insertions(+), 1 deletion(-) diff --git a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c index 73f2500f81..f7b2fbd67e 100644 --- a/xen/arch/ia64/asm-offsets.c +++ b/xen/arch/ia64/asm-offsets.c @@ -8,6 +8,7 @@ #include #include #include +#include #define task_struct exec_domain @@ -37,6 +38,9 @@ void foo(void) BLANK(); + DEFINE(XSI_PSR_IC_OFS, offsetof(vcpu_info_t, arch.interrupt_collection_enabled)); + DEFINE(XSI_PSR_IC, (SHAREDINFO_ADDR+offsetof(vcpu_info_t, arch.interrupt_collection_enabled))); + DEFINE(XSI_PSR_I_OFS, offsetof(vcpu_info_t, arch.interrupt_delivery_enabled)); //DEFINE(IA64_TASK_BLOCKED_OFFSET,offsetof (struct task_struct, blocked)); //DEFINE(IA64_TASK_CLEAR_CHILD_TID_OFFSET,offsetof (struct task_struct, clear_child_tid)); //DEFINE(IA64_TASK_GROUP_LEADER_OFFSET, offsetof (struct task_struct, group_leader)); diff --git a/xen/arch/ia64/ivt.S b/xen/arch/ia64/ivt.S index f68e78a834..a44f5eb8a9 100644 --- a/xen/arch/ia64/ivt.S +++ b/xen/arch/ia64/ivt.S @@ -778,10 +778,22 @@ ENTRY(break_fault) mov r17=cr.iim mov r31=pr ;; + movl r18=XSI_PSR_IC + ;; + ld8 r19=[r18] + ;; cmp.eq p7,p0=r0,r17 // is this a psuedo-cover? - // FIXME: may also need to check slot==2? (p7) br.sptk.many dispatch_privop_fault + ;; + cmp.ne p7,p0=r0,r19 +(p7) br.sptk.many dispatch_break_fault + // If we get to here, we have a hyperprivop + // For now, hyperprivops are handled through the break mechanism + // Later, they will be fast hand-coded assembly with psr.ic off + // which means no calls, no use of r1-r15 and no memory accesses + // except to pinned addresses! br.sptk.many dispatch_break_fault + ;; #endif mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat. mov r17=cr.iim diff --git a/xen/arch/ia64/privop.c b/xen/arch/ia64/privop.c index 11ef17310f..c17adc5bde 100644 --- a/xen/arch/ia64/privop.c +++ b/xen/arch/ia64/privop.c @@ -758,6 +758,38 @@ priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr) } +// FIXME: Move these to include/public/arch-ia64? +#define HYPERPRIVOP_RFI 1 +#define HYPERPRIVOP_RSM_DT 2 +#define HYPERPRIVOP_SSM_DT 3 +#define HYPERPRIVOP_COVER 4 + +/* hyperprivops are generally executed in assembly (with physical psr.ic off) + * so this code is primarily used for debugging them */ +int +ia64_hyperprivop(unsigned long iim) +{ + struct exec_domain *ed = (struct domain *) current; + +// FIXME: Add instrumentation for these + switch(iim) { + case HYPERPRIVOP_RFI: + (void)vcpu_rfi(ed); + return 0; // don't update iip + case HYPERPRIVOP_RSM_DT: + (void)vcpu_reset_psr_dt(ed); + return 1; + case HYPERPRIVOP_SSM_DT: + (void)vcpu_set_psr_dt(ed); + return 1; + case HYPERPRIVOP_COVER: + (void)vcpu_cover(ed); + return 1; + } + return 0; +} + + /************************************************************************** Privileged operation instrumentation routines **************************************************************************/ diff --git a/xen/arch/ia64/process.c b/xen/arch/ia64/process.c index 221812123d..27115d1b39 100644 --- a/xen/arch/ia64/process.c +++ b/xen/arch/ia64/process.c @@ -722,6 +722,10 @@ ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, u if (ia64_hypercall(regs)) vcpu_increment_iip(current); } + else if (!PSCB(ed,interrupt_collection_enabled)) { + if (ia64_hyperprivop(iim)) + vcpu_increment_iip(current); + } else reflect_interruption(ifa,isr,iim,regs,IA64_BREAK_VECTOR); } diff --git a/xen/arch/ia64/vcpu.c b/xen/arch/ia64/vcpu.c index f5c3f458b3..f07d5ab788 100644 --- a/xen/arch/ia64/vcpu.c +++ b/xen/arch/ia64/vcpu.c @@ -120,6 +120,12 @@ void vcpu_set_metaphysical_mode(VCPU *vcpu, BOOLEAN newmode) } } +IA64FAULT vcpu_reset_psr_dt(VCPU *vcpu) +{ + vcpu_set_metaphysical_mode(vcpu,TRUE); + return IA64_NO_FAULT; +} + IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24) { struct ia64_psr psr, imm, *ipsr; @@ -154,6 +160,12 @@ IA64FAULT vcpu_reset_psr_sm(VCPU *vcpu, UINT64 imm24) extern UINT64 vcpu_check_pending_interrupts(VCPU *vcpu); #define SPURIOUS_VECTOR 0xf +IA64FAULT vcpu_set_psr_dt(VCPU *vcpu) +{ + vcpu_set_metaphysical_mode(vcpu,FALSE); + return IA64_NO_FAULT; +} + IA64FAULT vcpu_set_psr_sm(VCPU *vcpu, UINT64 imm24) { struct ia64_psr psr, imm, *ipsr; -- 2.30.2